# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
#	           ChangeSet	1.1069.1.103+1.1069.51.1 -> 1.1069.52.1
#	include/linux/sysctl.h	1.23.1.12 -> 1.31.1.1
#	drivers/char/drm-4.0/radeon_drv.c	1.1.1.1 -> 1.3    
#	  include/linux/mm.h	1.39.1.5 -> 1.48   
#	drivers/char/drm-4.0/bufs.c	1.1.1.1 -> 1.3    
#	     mm/page_alloc.c	1.56.1.8 -> 1.62.1.1
#	drivers/char/drm-4.0/tdfx_drv.c	1.2.1.1 -> 1.4    
#	     kernel/sysctl.c	1.19.1.9 -> 1.23.1.3
#	 fs/proc/proc_misc.c	1.20.1.5 -> 1.23.1.3
#	drivers/char/drm/r128_cce.c	1.5.1.2 -> 1.11   
#	drivers/char/drm-4.0/i810_drv.c	1.1.1.1 -> 1.3    
#	drivers/char/drm/drm_memory.h	1.3.1.2 -> 1.7    
#	         init/main.c	1.27.1.3 -> 1.31   
#	drivers/char/drm/radeon_cp.c	1.6.1.2 -> 1.11   
#	drivers/char/drm/drm_bufs.h	1.5.1.2 -> 1.8    
#	  include/linux/fs.h	1.69.1.21 -> 1.76.1.4
#	drivers/scsi/Makefile	1.19.1.2 -> 1.20.1.3
#	drivers/char/drm/mga_dma.c	1.4.1.3 -> 1.8    
#	drivers/char/drm/i810_dma.c	1.7.1.3 -> 1.9.1.2
#	drivers/acpi/system.c	1.13.1.1 -> 1.15   
#	drivers/char/drm-4.0/r128_drv.c	1.1.1.1 -> 1.3    
#	drivers/scsi/Config.in	1.15.1.6 -> 1.18.1.4
#	drivers/char/drm-4.0/drmP.h	1.1.1.1 -> 1.3    
#	         mm/memory.c	1.51.1.6 -> 1.54.1.7
#	drivers/scsi/ql12160_fw.h	1.1.1.1 -> 1.3    
#	drivers/char/drm-4.0/gamma_drv.c	1.1.1.1 -> 1.3    
#	drivers/char/drm-4.0/mga_dma.c	1.1.1.1 -> 1.3    
#	      fs/proc/base.c	1.12.1.5 -> 1.14.1.4
#	drivers/char/drm-4.0/ffb_drv.c	1.3.1.1 -> 1.5    
#	drivers/char/drm-4.0/i810_dma.c	1.3.1.3 -> 1.6.1.1
#	drivers/char/drm-4.0/r128_cce.c	1.1.1.1 -> 1.5    
#	drivers/scsi/ql1280_fw.h	1.1.1.1 -> 1.3    
#	drivers/char/drm/drmP.h	1.9.1.2 -> 1.13   
#	drivers/char/drm-4.0/mga_drv.c	1.1.1.1 -> 1.3    
#	drivers/char/drm-4.0/radeon_cp.c	1.1.1.1 -> 1.4    
#	drivers/char/drm/drm_drv.h	1.4.1.2 -> 1.7    
#	drivers/acpi/Config.in	1.9.1.6 -> 1.12.1.4
#	drivers/char/drm-4.0/memory.c	1.1.1.1 -> 1.3    
#	Documentation/Configure.help	1.162.1.41 -> 1.166.1.16
#	drivers/scsi/qla1280.h	1.2.1.1 -> 1.4    
#	drivers/scsi/qla1280.c	1.6.1.1 -> 1.8    
#	drivers/char/drm/drm_vm.h	1.12.1.3 -> 1.16   
#
diff -Nru a/Documentation/Configure.help b/Documentation/Configure.help
--- a/Documentation/Configure.help	Thu Oct  9 15:19:46 2003
+++ b/Documentation/Configure.help	Thu Oct  9 15:19:46 2003
@@ -18209,6 +18209,11 @@
   purpose port, say Y here. See
   <http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf>.
 
+Support for serial ports defined in ACPI namespace
+CONFIG_SERIAL_ACPI
+  If you wish to enable serial port discovery via the ACPI
+  namespace, say Y here.  If unsure, say N.
+
 Support for PowerMac serial ports
 CONFIG_MAC_SERIAL
   If you have Macintosh style serial ports (8 pin mini-DIN), say Y
diff -Nru a/drivers/acpi/system.c b/drivers/acpi/system.c
--- a/drivers/acpi/system.c	Thu Oct  9 15:19:45 2003
+++ b/drivers/acpi/system.c	Thu Oct  9 15:19:45 2003
@@ -92,7 +92,13 @@
 static void
 acpi_power_off (void)
 {
+#ifdef CONFIG_X86
 	acpi_suspend(ACPI_STATE_S5);
+#else
+	acpi_enter_sleep_state_prep(ACPI_STATE_S5);
+	ACPI_DISABLE_IRQS();
+	acpi_enter_sleep_state(ACPI_STATE_S5);
+#endif
 }
 
 #endif /*CONFIG_PM*/
diff -Nru a/drivers/char/drm/drm_bufs.h b/drivers/char/drm/drm_bufs.h
--- a/drivers/char/drm/drm_bufs.h	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm/drm_bufs.h	Thu Oct  9 15:19:45 2003
@@ -106,7 +106,7 @@
 	switch ( map->type ) {
 	case _DRM_REGISTERS:
 	case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__)
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
 		if ( map->offset + map->size < map->offset ||
 		     map->offset < virt_to_phys(high_memory) ) {
 			DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
diff -Nru a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h
--- a/drivers/char/drm/drm_memory.h	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm/drm_memory.h	Thu Oct  9 15:19:45 2003
@@ -293,6 +293,11 @@
 void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
 {
 	void *pt;
+#if __REALLY_HAVE_AGP
+	drm_map_t *map = NULL;
+	drm_map_list_t *r_list;
+	struct list_head *list;
+#endif
 
 	if (!size) {
 		DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
@@ -300,12 +305,50 @@
 		return NULL;
 	}
 
+#if __REALLY_HAVE_AGP
+	if (!dev->agp || dev->agp->cant_use_aperture == 0)
+		goto standard_ioremap;
+
+	list_for_each(list, &dev->maplist->head) {
+		r_list = (drm_map_list_t *)list;
+		map = r_list->map;
+		if (!map) continue;
+		if (map->offset <= offset &&
+			(map->offset + map->size) >= (offset + size))
+			break;
+	}
+	
+	if (map && map->type == _DRM_AGP) {
+		struct drm_agp_mem *agpmem;
+
+		for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
+			if (agpmem->bound <= offset &&
+			   (agpmem->bound + (agpmem->pages
+				<< PAGE_SHIFT)) >= (offset + size))
+				break;
+		}
+
+		if (agpmem == NULL)
+			goto ioremap_failure;
+
+		pt = agpmem->memory->vmptr + (offset - agpmem->bound);
+		goto ioremap_success;
+	}
+
+standard_ioremap:
+#endif
 	if (!(pt = ioremap(offset, size))) {
+#if __REALLY_HAVE_AGP
+ioremap_failure:
+#endif
 		spin_lock(&DRM(mem_lock));
 		++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
 		spin_unlock(&DRM(mem_lock));
 		return NULL;
 	}
+#if __REALLY_HAVE_AGP
+ioremap_success:
+#endif
 	spin_lock(&DRM(mem_lock));
 	++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
 	DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
@@ -316,6 +359,11 @@
 void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
 {
 	void *pt;
+#if __REALLY_HAVE_AGP
+	drm_map_t *map = NULL;
+	drm_map_list_t *r_list;
+	struct list_head *list;
+#endif
 
 	if (!size) {
 		DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
@@ -323,12 +371,50 @@
 		return NULL;
 	}
 
+#if __REALLY_HAVE_AGP
+	if (!dev->agp || dev->agp->cant_use_aperture == 0)
+		goto standard_ioremap;
+
+	list_for_each(list, &dev->maplist->head) {
+		r_list = (drm_map_list_t *)list;
+		map = r_list->map;
+		if (!map) continue;
+		if (map->offset <= offset &&
+			(map->offset + map->size) >= (offset + size))
+			break;
+	}
+	
+	if (map && map->type == _DRM_AGP) {
+		struct drm_agp_mem *agpmem;
+
+		for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
+			if (agpmem->bound <= offset &&
+			   (agpmem->bound + (agpmem->pages
+				<< PAGE_SHIFT)) >= (offset + size))
+				break;
+		}
+
+		if (agpmem == NULL)
+			goto ioremap_failure;
+
+		pt = agpmem->memory->vmptr + (offset - agpmem->bound);
+		goto ioremap_success;
+	}
+
+standard_ioremap:
+#endif
 	if (!(pt = ioremap_nocache(offset, size))) {
+#if __REALLY_HAVE_AGP
+ioremap_failure:
+#endif
 		spin_lock(&DRM(mem_lock));
 		++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
 		spin_unlock(&DRM(mem_lock));
 		return NULL;
 	}
+#if __REALLY_HAVE_AGP
+ioremap_success:
+#endif
 	spin_lock(&DRM(mem_lock));
 	++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
 	DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
@@ -344,7 +430,11 @@
 	if (!pt)
 		DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
 			      "Attempt to free NULL pointer\n");
+#if __REALLY_HAVE_AGP
+	else if (!dev->agp || dev->agp->cant_use_aperture == 0)
+#else
 	else
+#endif
 		iounmap(pt);
 
 	spin_lock(&DRM(mem_lock));
diff -Nru a/drivers/char/drm/drm_vm.h b/drivers/char/drm/drm_vm.h
--- a/drivers/char/drm/drm_vm.h	Thu Oct  9 15:19:46 2003
+++ b/drivers/char/drm/drm_vm.h	Thu Oct  9 15:19:46 2003
@@ -368,6 +368,7 @@
 	drm_map_list_t  *r_list;
 	unsigned long   offset  = 0;
 	struct list_head *list;
+	struct page 	*page;
 
 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
 		  vma->vm_start, vma->vm_end, VM_OFFSET(vma));
@@ -414,28 +415,30 @@
 
 	switch (map->type) {
         case _DRM_AGP:
-#if defined(__alpha__)
-                /*
-                 * On Alpha we can't talk to bus dma address from the
-                 * CPU, so for memory of type DRM_AGP, we'll deal with
-                 * sorting out the real physical pages and mappings
-                 * in nopage()
-                 */
-                vma->vm_ops = &DRM(vm_ops);
-                break;
+#if __REALLY_HAVE_AGP
+		if (dev->agp->cant_use_aperture) {
+                	/*
+                	 * On some systems we can't talk to bus dma address from
+                	 * the CPU, so for memory of type DRM_AGP, we'll deal
+                	 * with sorting out the real physical pages and mappings
+                	 * in nopage()
+                	 */
+                	vma->vm_ops = &DRM(vm_ops);
+			goto mapswitch_out;
+		}
 #endif
                 /* fall through to _DRM_FRAME_BUFFER... */        
 	case _DRM_FRAME_BUFFER:
 	case _DRM_REGISTERS:
-		if (VM_OFFSET(vma) >= __pa(high_memory)) {
+		page = virt_to_page(__va(VM_OFFSET(vma)));
+		if (!VALID_PAGE(page) || PageReserved(page)) {
 #if defined(__i386__) || defined(__x86_64__)
 			if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
 				pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
 				pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
 			}
 #elif defined(__ia64__)
-			if (map->type != _DRM_AGP)
-				vma->vm_page_prot =
+			vma->vm_page_prot =
 					pgprot_writecombine(vma->vm_page_prot);
 #elif defined(__powerpc__)
 			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
@@ -474,6 +477,9 @@
 	default:
 		return -EINVAL;	/* This should never happen. */
 	}
+#if __REALLY_HAVE_AGP
+mapswitch_out:
+#endif
 	vma->vm_flags |= VM_RESERVED; /* Don't swap */
 
 	vma->vm_file  =	 filp;	/* Needed for drm_vm_open() */
diff -Nru a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
--- a/drivers/char/drm/r128_cce.c	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm/r128_cce.c	Thu Oct  9 15:19:45 2003
@@ -216,7 +216,22 @@
 	int i;
 
 	for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+#ifndef CONFIG_AGP_I460
 		if ( GET_RING_HEAD( &dev_priv->ring ) == dev_priv->ring.tail ) {
+#else
+		/*
+		 * XXX - this is (I think) a 460GX specific hack
+		 *
+		 * When doing texturing, ring.tail sometimes gets ahead of
+		 * PM4_BUFFER_DL_WPTR by 2; consequently, the card processes
+		 * its whole quota of instructions and *ring.head is still 2
+		 * short of ring.tail.  Work around this for now in lieu of
+		 * a better solution.
+		 */
+ 		if ( GET_RING_HEAD( &dev_priv->ring ) == dev_priv->ring.tail ||
+			( dev_priv->ring.tail -
+				GET_RING_HEAD( &dev_priv->ring ) ) == 2 ) {
+#endif
 			int pm4stat = R128_READ( R128_PM4_STAT );
 			if ( ( (pm4stat & R128_PM4_FIFOCNT_MASK) >=
 			       dev_priv->cce_fifo_size ) &&
@@ -317,7 +332,7 @@
 static void r128_cce_init_ring_buffer( drm_device_t *dev,
 				       drm_r128_private_t *dev_priv )
 {
-	u32 ring_start;
+	u32 ring_start, rptr_addr;
 	u32 tmp;
 
 	DRM_DEBUG( "\n" );
@@ -341,8 +356,24 @@
 	SET_RING_HEAD( &dev_priv->ring, 0 );
 
 	if ( !dev_priv->is_pci ) {
-		R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
-			    dev_priv->ring_rptr->offset );
+		/*
+		 * 460GX doesn't claim PCI writes from the card into
+		 * the AGP aperture, so we have to get space outside
+		 * the aperture for RPTR_ADDR.
+		 */
+		if ( dev->agp->agp_info.chipset == INTEL_460GX ) {
+			unsigned long alt_rh_off;
+
+			alt_rh_off = __get_free_page(GFP_KERNEL | GFP_DMA);
+			atomic_inc(&virt_to_page(alt_rh_off)->count);
+			set_bit(PG_locked, &virt_to_page(alt_rh_off)->flags);
+
+			dev_priv->ring.head = (__volatile__ u32 *) alt_rh_off;
+			SET_RING_HEAD( &dev_priv->ring, 0 );
+			rptr_addr = __pa( dev_priv->ring.head );
+		} else
+			rptr_addr = dev_priv->ring_rptr->offset;
+		R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, rptr_addr );
 	} else {
 		drm_sg_mem_t *entry = dev->sg;
 		unsigned long tmp_ofs, page_ofs;
@@ -629,7 +660,19 @@
 				DRM_ERROR( "failed to cleanup PCI GART!\n" );
 		}
 #endif
-
+		/*
+		 * Free the page we grabbed for RPTR_ADDR
+		 */
+		if ( !dev_priv->is_pci && dev->agp->agp_info.chipset == INTEL_460GX ) {
+			unsigned long alt_rh_off =
+				(unsigned long) dev_priv->ring.head;
+			struct page *p = virt_to_page((void *)alt_rh_off);
+
+			put_page(p);
+			unlock_page(p);
+			free_page(alt_rh_off);
+		}
+	
 		DRM(free)( dev->dev_private, sizeof(drm_r128_private_t),
 			   DRM_MEM_DRIVER );
 		dev->dev_private = NULL;
diff -Nru a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
--- a/drivers/char/drm/radeon_cp.c	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm/radeon_cp.c	Thu Oct  9 15:19:45 2003
@@ -854,7 +854,7 @@
 static void radeon_cp_init_ring_buffer( drm_device_t *dev,
 				        drm_radeon_private_t *dev_priv )
 {
-	u32 ring_start, cur_read_ptr;
+	u32 ring_start, cur_read_ptr, rptr_addr;
 	u32 tmp;
 
 	/* Initialize the memory controller */
@@ -892,8 +892,24 @@
 	dev_priv->ring.tail = cur_read_ptr;
 
 	if ( !dev_priv->is_pci ) {
-		RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
-			      dev_priv->ring_rptr->offset );
+		/*
+		 * 460GX doesn't claim PCI writes from the card into
+		 * the AGP aperture, so we have to get space outside
+		 * the aperture for RPTR_ADDR.
+		 */
+		if ( dev->agp->agp_info.chipset == INTEL_460GX ) {
+			unsigned long alt_rh_off;
+
+			alt_rh_off = __get_free_page(GFP_KERNEL | GFP_DMA);
+			atomic_inc(&virt_to_page(alt_rh_off)->count);
+			set_bit(PG_locked, &virt_to_page(alt_rh_off)->flags);
+
+			dev_priv->ring.head = (__volatile__ u32 *) alt_rh_off;
+			*dev_priv->ring.head = cur_read_ptr;
+			rptr_addr = __pa( dev_priv->ring.head );
+		} else
+			rptr_addr = dev_priv->ring_rptr->offset;
+		RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, rptr_addr );
 	} else {
 		drm_sg_mem_t *entry = dev->sg;
 		unsigned long tmp_ofs, page_ofs;
@@ -1278,6 +1294,19 @@
 #endif /* __REALLY_HAVE_SG */
 		}
 
+		/*
+		 * Free the page we grabbed for RPTR_ADDR
+		 */
+		if ( !dev_priv->is_pci && dev->agp->agp_info.chipset == INTEL_460GX ) {
+			unsigned long alt_rh_off =
+				(unsigned long) dev_priv->ring.head;
+			struct page *p = virt_to_page((void *)alt_rh_off);
+
+			put_page(p);
+			unlock_page(p);
+			free_page(alt_rh_off);
+		}
+	
 		DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t),
 			   DRM_MEM_DRIVER );
 		dev->dev_private = NULL;
diff -Nru a/drivers/char/drm-4.0/bufs.c b/drivers/char/drm-4.0/bufs.c
--- a/drivers/char/drm-4.0/bufs.c	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm-4.0/bufs.c	Thu Oct  9 15:19:45 2003
@@ -73,7 +73,7 @@
 	switch (map->type) {
 	case _DRM_REGISTERS:
 	case _DRM_FRAME_BUFFER:
-#ifndef __sparc__
+#if !defined(__sparc__) && !defined(__ia64__)
 		if (map->offset + map->size < map->offset
 		    || map->offset < virt_to_phys(high_memory)) {
 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
diff -Nru a/drivers/char/drm-4.0/drmP.h b/drivers/char/drm-4.0/drmP.h
--- a/drivers/char/drm-4.0/drmP.h	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm-4.0/drmP.h	Thu Oct  9 15:19:45 2003
@@ -510,6 +510,8 @@
 	int                acquired;
 	unsigned long      base;
    	int 		   agp_mtrr;
+	int		   cant_use_aperture;
+	unsigned long	   page_mask;
 } drm_agp_head_t;
 #endif
 
diff -Nru a/drivers/char/drm-4.0/memory.c b/drivers/char/drm-4.0/memory.c
--- a/drivers/char/drm-4.0/memory.c	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm-4.0/memory.c	Thu Oct  9 15:19:45 2003
@@ -306,12 +306,44 @@
 		return NULL;
 	}
 	
+	if (dev->agp->cant_use_aperture) {
+		drm_map_t *map    = NULL;
+		int i;
+
+		for (i = 0; i < dev->map_count; i++) {
+			map = dev->maplist[i];
+			if (!map) continue;
+			if (map->offset <= offset &&
+				(map->offset + map->size) >= (offset + size))
+				break;
+		}
+		
+		if (map && map->type == _DRM_AGP) {
+			struct drm_agp_mem *agpmem;
+
+			for (agpmem = dev->agp->memory; agpmem;
+						agpmem = agpmem->next) {
+				if(agpmem->bound <= offset &&
+				   (agpmem->bound + (agpmem->pages
+					<< PAGE_SHIFT)) >= (offset + size))
+					break;
+			}
+
+			if (agpmem) {
+				pt = agpmem->memory->vmptr + (offset - agpmem->bound);
+				goto ioremap_success;
+			}
+		}
+	}
+
 	if (!(pt = ioremap(offset, size))) {
 		spin_lock(&drm_mem_lock);
 		++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
 		spin_unlock(&drm_mem_lock);
 		return NULL;
 	}
+
+ioremap_success:
 	spin_lock(&drm_mem_lock);
 	++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
 	drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
@@ -327,7 +359,7 @@
 	if (!pt)
 		DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
 			      "Attempt to free NULL pointer\n");
-	else
+	else if (dev->agp->cant_use_aperture == 0)
 		iounmap(pt);
 	
 	spin_lock(&drm_mem_lock);
diff -Nru a/drivers/char/drm-4.0/mga_dma.c b/drivers/char/drm-4.0/mga_dma.c
--- a/drivers/char/drm-4.0/mga_dma.c	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm-4.0/mga_dma.c	Thu Oct  9 15:19:45 2003
@@ -741,10 +741,18 @@
 		return -ENOMEM;
 	}
 
-   	/* Write status page when secend or softrap occurs */
+   	/* Write status page when secend or softrap occurs
+	 *
+	 * Disable this on ia64 on the off chance that real status page will be
+	 * above 4GB.
+	 */
+#if defined(__ia64__)
+   	MGA_WRITE(MGAREG_PRIMPTR,
+		  virt_to_bus((void *)dev_priv->real_status_page));
+#else
    	MGA_WRITE(MGAREG_PRIMPTR,
 		  virt_to_bus((void *)dev_priv->real_status_page) | 0x00000003);
-
+#endif
 
 	/* Private is now filled in, initialize the hardware */
 	{
diff -Nru a/drivers/char/drm-4.0/r128_cce.c b/drivers/char/drm-4.0/r128_cce.c
--- a/drivers/char/drm-4.0/r128_cce.c	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm-4.0/r128_cce.c	Thu Oct  9 15:19:45 2003
@@ -229,7 +229,21 @@
 	int i;
 
 	for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+#ifndef CONFIG_AGP_I460
 		if ( *dev_priv->ring.head == dev_priv->ring.tail ) {
+#else
+		/*
+		 * XXX - this is (I think) a 460GX specific hack
+		 *
+		 * When doing texturing, ring.tail sometimes gets ahead of
+		 * PM4_BUFFER_DL_WPTR by 2; consequently, the card processes
+		 * its whole quota of instructions and *ring.head is still 2
+		 * short of ring.tail.  Work around this for now in lieu of
+		 * a better solution.
+		 */
+		if ( (*dev_priv->ring.head == dev_priv->ring.tail) ||
+			((dev_priv->ring.tail - *dev_priv->ring.head) == 2) ) {
+#endif
 			int pm4stat = R128_READ( R128_PM4_STAT );
 			if ( ( (pm4stat & R128_PM4_FIFOCNT_MASK) >=
 			       dev_priv->cce_fifo_size ) &&
@@ -330,7 +344,7 @@
 static void r128_cce_init_ring_buffer( drm_device_t *dev )
 {
 	drm_r128_private_t *dev_priv = dev->dev_private;
-	u32 ring_start;
+	u32 ring_start, rptr_addr;
 	u32 tmp;
 
 	/* The manual (p. 2) says this address is in "VM space".  This
@@ -342,10 +356,27 @@
 	R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 );
 	R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 );
 
+	/*
+	 * 460GX doesn't claim PCI writes from the card into the AGP
+	 * aperture, so we have to get space outside the aperture for
+	 * RPTR_ADDR.
+	 */
+	if ( dev->agp->agp_info.chipset == INTEL_460GX ) {
+		unsigned long alt_rh_off;
+
+		alt_rh_off = __get_free_page(GFP_KERNEL | GFP_DMA);
+		atomic_inc(&virt_to_page(alt_rh_off)->count);
+		set_bit(PG_locked, &virt_to_page(alt_rh_off)->flags);
+
+		dev_priv->ring.head = (__volatile__ u32 *) alt_rh_off;
+		rptr_addr = __pa( dev_priv->ring.head );
+	} else {
+		rptr_addr = dev_priv->ring_rptr->offset;
+	}
+
 	/* DL_RPTR_ADDR is a physical address in AGP space. */
 	*dev_priv->ring.head = 0;
-	R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
-		    dev_priv->ring_rptr->offset );
+	R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, rptr_addr );
 
 	/* Set watermark control */
 	R128_WRITE( R128_PM4_BUFFER_WM_CNTL,
@@ -529,6 +560,19 @@
 			DO_REMAPFREE( dev_priv->agp_textures, dev );
 		}
 #endif
+
+		/*
+		 * Free the page we grabbed for RPTR_ADDR
+		 */
+		if ( dev->agp->agp_info.chipset == INTEL_460GX ) {
+			unsigned long alt_rh_off =
+				(unsigned long) dev_priv->ring.head;
+
+			atomic_dec(&virt_to_page(alt_rh_off)->count);
+			clear_bit(PG_locked, &virt_to_page(alt_rh_off)->flags);
+			wake_up(&virt_to_page(alt_rh_off)->wait);
+			free_page(alt_rh_off);
+		}
 
 		drm_free( dev->dev_private, sizeof(drm_r128_private_t),
 			  DRM_MEM_DRIVER );
diff -Nru a/drivers/char/drm-4.0/radeon_cp.c b/drivers/char/drm-4.0/radeon_cp.c
--- a/drivers/char/drm-4.0/radeon_cp.c	Thu Oct  9 15:19:45 2003
+++ b/drivers/char/drm-4.0/radeon_cp.c	Thu Oct  9 15:19:45 2003
@@ -569,7 +569,7 @@
 static void radeon_cp_init_ring_buffer( drm_device_t *dev )
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
-	u32 ring_start, cur_read_ptr;
+	u32 ring_start, cur_read_ptr, rptr_addr;
 	u32 tmp;
 
 	/* Initialize the memory controller */
@@ -592,10 +592,29 @@
 	/* Initialize the ring buffer's read and write pointers */
 	cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR );
 	RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr );
+
 	*dev_priv->ring.head = cur_read_ptr;
 	dev_priv->ring.tail = cur_read_ptr;
 
-	RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset );
+	/*
+	 * 460GX doesn't claim PCI writes from the card into the AGP
+	 * aperture, so we have to get space outside the aperture for
+	 * RPTR_ADDR.
+	 */
+	if ( dev->agp->agp_info.chipset == INTEL_460GX ) {
+		unsigned long alt_rh_off;
+
+		alt_rh_off = __get_free_page(GFP_KERNEL | GFP_DMA);
+		atomic_inc(&virt_to_page(alt_rh_off)->count);
+		set_bit(PG_locked, &virt_to_page(alt_rh_off)->flags);
+
+		dev_priv->ring.head = (__volatile__ u32 *) alt_rh_off;
+		*dev_priv->ring.head = cur_read_ptr;
+		rptr_addr = __pa( dev_priv->ring.head );
+	} else
+		rptr_addr = dev_priv->ring_rptr->offset;
+
+	RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, rptr_addr);
 
 	/* Set ring buffer size */
 	RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw );
@@ -836,6 +855,19 @@
 			DO_IOREMAPFREE( dev_priv->agp_textures, dev );
 		}
 #endif
+
+		/*
+		 * Free the page we grabbed for RPTR_ADDR.
+		 */
+		if ( dev->agp->agp_info.chipset == INTEL_460GX ) {
+			unsigned long alt_rh_off =
+				(unsigned long) dev_priv->ring.head;
+
+			atomic_dec(&virt_to_page(alt_rh_off)->count);
+			clear_bit(PG_locked, &virt_to_page(alt_rh_off)->flags);
+			wake_up(&virt_to_page(alt_rh_off)->wait);
+			free_page(alt_rh_off);
+		}
 
 		drm_free( dev->dev_private, sizeof(drm_radeon_private_t),
 			  DRM_MEM_DRIVER );
diff -Nru a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
--- a/fs/proc/proc_misc.c	Thu Oct  9 15:19:45 2003
+++ b/fs/proc/proc_misc.c	Thu Oct  9 15:19:45 2003
@@ -36,6 +36,7 @@
 #include <linux/init.h>
 #include <linux/smp_lock.h>
 #include <linux/seq_file.h>
+#include <linux/hugetlb.h>
 #include <linux/sysrq.h>
 
 #include <asm/uaccess.h>
@@ -209,6 +210,8 @@
 		K(i.freeram-i.freehigh),
 		K(i.totalswap),
 		K(i.freeswap));
+
+	len += hugetlb_report_meminfo(page + len);
 
 	return proc_calc_metrics(page, start, off, count, eof, len);
 #undef B
diff -Nru a/include/linux/fs.h b/include/linux/fs.h
--- a/include/linux/fs.h	Thu Oct  9 15:19:45 2003
+++ b/include/linux/fs.h	Thu Oct  9 15:19:45 2003
@@ -246,7 +246,7 @@
 	/* First cache line: */
 	struct buffer_head *b_next;	/* Hash queue list */
 	unsigned long b_blocknr;	/* block number */
-	unsigned short b_size;		/* block size */
+	unsigned int b_size;		/* block size */
 	unsigned short b_list;		/* List that this buffer appears */
 	kdev_t b_dev;			/* device (B_FREE = free) */
 
diff -Nru a/include/linux/mm.h b/include/linux/mm.h
--- a/include/linux/mm.h	Thu Oct  9 15:19:45 2003
+++ b/include/linux/mm.h	Thu Oct  9 15:19:45 2003
@@ -103,6 +103,9 @@
 #define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
 #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
 #define VM_RESERVED	0x00080000	/* Don't unmap it from swap_out */
+#define VM_WRITECOMBINED 0x00100000	/* Write-combined */
+#define VM_NONCACHED	0x00200000	/* Noncached access */
+#define VM_HUGETLB	0x00400000  	/* Huge tlb Page*/
 
 #ifndef VM_STACK_FLAGS
 #define VM_STACK_FLAGS	0x00000177
diff -Nru a/include/linux/sysctl.h b/include/linux/sysctl.h
--- a/include/linux/sysctl.h	Thu Oct  9 15:19:45 2003
+++ b/include/linux/sysctl.h	Thu Oct  9 15:19:45 2003
@@ -156,6 +156,7 @@
 	VM_MAPPED_RATIO=20,     /* amount of unfreeable pages that triggers swapout */
 	VM_LAPTOP_MODE=21,	/* kernel in laptop flush mode */
 	VM_BLOCK_DUMP=22,	/* dump fs activity to log */
+	VM_HUGETLB_PAGES=23,	/* int: Number of available Huge Pages */
 };
 
 
diff -Nru a/init/main.c b/init/main.c
--- a/init/main.c	Thu Oct  9 15:19:45 2003
+++ b/init/main.c	Thu Oct  9 15:19:45 2003
@@ -291,6 +291,7 @@
 
 
 extern void setup_arch(char **);
+extern void __init build_all_zonelists(void);
 extern void cpu_idle(void);
 
 unsigned long wait_init_idle;
@@ -361,6 +362,7 @@
 	lock_kernel();
 	printk(linux_banner);
 	setup_arch(&command_line);
+	build_all_zonelists();
 	printk("Kernel command line: %s\n", saved_command_line);
 	parse_options(command_line);
 	trap_init();
diff -Nru a/kernel/sysctl.c b/kernel/sysctl.c
--- a/kernel/sysctl.c	Thu Oct  9 15:19:45 2003
+++ b/kernel/sysctl.c	Thu Oct  9 15:19:45 2003
@@ -31,6 +31,7 @@
 #include <linux/sysrq.h>
 #include <linux/highuid.h>
 #include <linux/swap.h>
+#include <linux/hugetlb.h>
 
 #include <asm/uaccess.h>
 
@@ -312,6 +313,10 @@
 	 &laptop_mode, sizeof(int), 0644, NULL, &proc_dointvec},
 	{VM_BLOCK_DUMP, "block_dump",
 	 &block_dump, sizeof(int), 0644, NULL, &proc_dointvec},
+#ifdef CONFIG_HUGETLB_PAGE
+	{VM_HUGETLB_PAGES, "nr_hugepages", &htlbpage_max, sizeof(int), 0644, NULL,
+	&hugetlb_sysctl_handler},
+#endif
 	{0}
 };
 
diff -Nru a/mm/memory.c b/mm/memory.c
--- a/mm/memory.c	Thu Oct  9 15:19:45 2003
+++ b/mm/memory.c	Thu Oct  9 15:19:45 2003
@@ -37,6 +37,7 @@
  */
 
 #include <linux/mm.h>
+#include <linux/hugetlb.h>
 #include <linux/mman.h>
 #include <linux/swap.h>
 #include <linux/smp_lock.h>
@@ -121,7 +122,7 @@
 	pmd = pmd_offset(dir, 0);
 	pgd_clear(dir);
 	for (j = 0; j < PTRS_PER_PMD ; j++) {
-		prefetchw(pmd+j+(PREFETCH_STRIDE/16));
+		prefetchw(pmd + j + PREFETCH_STRIDE/sizeof(*pmd));
 		free_one_pmd(pmd+j);
 	}
 	pmd_free(pmd);
@@ -181,6 +182,9 @@
 	unsigned long end = vma->vm_end;
 	unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 
+	if (is_vm_hugetlb_page(vma))
+		return copy_hugetlb_page_range(dst, src, vma);
+
 	src_pgd = pgd_offset(src, address)-1;
 	dst_pgd = pgd_offset(dst, address)-1;
 
@@ -473,6 +477,10 @@
 		if ( !vma || (pages && vma->vm_flags & VM_IO) || !(flags & vma->vm_flags) )
 			return i ? : -EFAULT;
 
+		if (is_vm_hugetlb_page(vma)) {
+			i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &len, i);
+			continue;
+		}
 		spin_lock(&mm->page_table_lock);
 		do {
 			struct page *map;
@@ -1370,6 +1378,9 @@
 
 	current->state = TASK_RUNNING;
 	pgd = pgd_offset(mm, address);
+
+	if (is_vm_hugetlb_page(vma))
+		return 0;	/* mapping truncation does this. */
 
 	/*
 	 * We need the page table lock to synchronize with kswapd
diff -Nru a/mm/page_alloc.c b/mm/page_alloc.c
--- a/mm/page_alloc.c	Thu Oct  9 15:19:45 2003
+++ b/mm/page_alloc.c	Thu Oct  9 15:19:45 2003
@@ -49,11 +49,11 @@
 /*
  * Temporary debugging check.
  */
-#define BAD_RANGE(zone, page)						\
-(									\
-	(((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size))	\
-	|| (((page) - mem_map) < (zone)->zone_start_mapnr)		\
-	|| ((zone) != page_zone(page))					\
+#define BAD_RANGE(zone, page)						     \
+(									     \
+	(((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size)) \
+	|| (((page) - mem_map) < (zone)->zone_start_mapnr)		     \
+	|| ((zone) != page_zone(page))					     \
 )
 
 /*
@@ -574,7 +574,7 @@
  		unsigned long nr, total, flags;
 
 		total = 0;
-		if (zone->size) {
+		if (zone->realsize) {
 			spin_lock_irqsave(&zone->lock, flags);
 		 	for (order = 0; order < MAX_ORDER; order++) {
 				head = &(zone->free_area + order)->free_list;
@@ -606,13 +606,44 @@
 /*
  * Builds allocation fallback zone lists.
  */
-static inline void build_zonelists(pg_data_t *pgdat)
+static int __init build_zonelists_node(pg_data_t *pgdat, zonelist_t *zonelist, int j, int k)
 {
-	int i, j, k;
+	zone_t *zone;
+	switch (k) {
+	default:
+		BUG();
+		/*
+		 * fallthrough:
+		 */
+	case ZONE_HIGHMEM:
+		zone = pgdat->node_zones + ZONE_HIGHMEM;
+		if (zone->realsize) {
+#ifndef CONFIG_HIGHMEM
+			BUG();
+#endif
+			zonelist->zones[j++] = zone;
+		}
+	case ZONE_NORMAL:
+		zone = pgdat->node_zones + ZONE_NORMAL;
+		if (zone->realsize)
+			zonelist->zones[j++] = zone;
+	case ZONE_DMA:
+		zone = pgdat->node_zones + ZONE_DMA;
+		if (zone->realsize)
+			zonelist->zones[j++] = zone;
+	}
+
+	return j;
+}
+
+static void __init build_zonelists(pg_data_t *pgdat)
+{
+	int i, j, k, node, local_node;
 
+	local_node = pgdat->node_id;
+	printk("Building zonelist for node : %d\n", local_node);
 	for (i = 0; i <= GFP_ZONEMASK; i++) {
 		zonelist_t *zonelist;
-		zone_t *zone;
 
 		zonelist = pgdat->node_zonelists + i;
 		memset(zonelist, 0, sizeof(*zonelist));
@@ -624,33 +655,32 @@
 		if (i & __GFP_DMA)
 			k = ZONE_DMA;
 
-		switch (k) {
-			default:
-				BUG();
-			/*
-			 * fallthrough:
-			 */
-			case ZONE_HIGHMEM:
-				zone = pgdat->node_zones + ZONE_HIGHMEM;
-				if (zone->size) {
-#ifndef CONFIG_HIGHMEM
-					BUG();
-#endif
-					zonelist->zones[j++] = zone;
-				}
-			case ZONE_NORMAL:
-				zone = pgdat->node_zones + ZONE_NORMAL;
-				if (zone->size)
-					zonelist->zones[j++] = zone;
-			case ZONE_DMA:
-				zone = pgdat->node_zones + ZONE_DMA;
-				if (zone->size)
-					zonelist->zones[j++] = zone;
-		}
+ 		j = build_zonelists_node(pgdat, zonelist, j, k);
+ 		/*
+ 		 * Now we build the zonelist so that it contains the zones
+ 		 * of all the other nodes.
+ 		 * We don't want to pressure a particular node, so when
+ 		 * building the zones for node N, we make sure that the
+ 		 * zones coming right after the local ones are those from
+ 		 * node N+1 (modulo N)
+ 		 */
+ 		for (node = local_node + 1; node < numnodes; node++)
+ 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
+ 		for (node = 0; node < local_node; node++)
+ 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
+ 
 		zonelist->zones[j++] = NULL;
 	} 
 }
 
+void __init build_all_zonelists(void)
+{
+	int i;
+
+	for(i = 0 ; i < numnodes ; i++)
+		build_zonelists(NODE_DATA(i));
+}
+
 /*
  * Helper functions to size the waitqueue hash table.
  * Essentially these want to choose hash table sizes sufficiently
@@ -693,6 +723,31 @@
 	return ffz(~size);
 }
 
+static unsigned long memmap_init(struct page *start, struct page *end,
+	int zone, unsigned long start_paddr, int highmem) 
+{
+	struct page *page;
+
+	for (page = start; page < end; page++) {
+		set_page_zone(page, zone);
+		set_page_count(page, 0);
+		SetPageReserved(page);
+		INIT_LIST_HEAD(&page->list);
+		if (!highmem)
+			set_page_address(page, __va(start_paddr));
+		start_paddr += PAGE_SIZE;
+	}
+	return start_paddr;
+}
+
+#ifdef HAVE_ARCH_MEMMAP_INIT
+#define MEMMAP_INIT(start, end, zone, paddr, highmem) \
+	arch_memmap_init(memmap_init, start, end, zone, paddr, highmem)
+#else
+#define MEMMAP_INIT(start, end, zone, paddr, highmem) \
+	memmap_init(start, end, zone, paddr, highmem)
+#endif
+
 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
 
 /*
@@ -714,10 +769,8 @@
 		BUG();
 
 	totalpages = 0;
-	for (i = 0; i < MAX_NR_ZONES; i++) {
-		unsigned long size = zones_size[i];
-		totalpages += size;
-	}
+	for (i = 0; i < MAX_NR_ZONES; i++)
+		totalpages += zones_size[i];
 	realtotalpages = totalpages;
 	if (zholes_size)
 		for (i = 0; i < MAX_NR_ZONES; i++)
@@ -726,7 +779,7 @@
 	printk("On node %d totalpages: %lu\n", nid, realtotalpages);
 
 	/*
-	 * Some architectures (with lots of mem and discontinous memory
+	 * Some architectures (with lots of mem and discontigous memory
 	 * maps) have to search for a good mem_map area:
 	 * For discontigmem, the conceptual mem map array starts from 
 	 * PAGE_OFFSET, we need to align the actual array onto a mem map 
@@ -739,7 +792,7 @@
 			MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET));
 	}
 	*gmap = pgdat->node_mem_map = lmem_map;
-	pgdat->node_size = totalpages;
+	pgdat->node_size = 0;
 	pgdat->node_start_paddr = zone_start_paddr;
 	pgdat->node_start_mapnr = (lmem_map - mem_map);
 	pgdat->nr_zones = 0;
@@ -756,7 +809,7 @@
 		if (zholes_size)
 			realsize -= zholes_size[j];
 
-		printk("zone(%lu): %lu pages.\n", j, size);
+		printk("zone(%lu): %lu pages.\n", j, realsize);
 		zone->size = size;
 		zone->realsize = realsize;
 		zone->name = zone_names[j];
@@ -767,6 +820,7 @@
 		 zone->nr_active_pages = zone->nr_inactive_pages = 0;
 
 
+		pgdat->node_size += realsize;
 		if (!size)
 			continue;
 
@@ -827,16 +881,10 @@
 		 * up by free_all_bootmem() once the early boot process is
 		 * done. Non-atomic initialization, single-pass.
 		 */
-		for (i = 0; i < size; i++) {
-			struct page *page = mem_map + offset + i;
-			set_page_zone(page, nid * MAX_NR_ZONES + j);
-			set_page_count(page, 0);
-			SetPageReserved(page);
-			INIT_LIST_HEAD(&page->list);
-			if (j != ZONE_HIGHMEM)
-				set_page_address(page, __va(zone_start_paddr));
-			zone_start_paddr += PAGE_SIZE;
-		}
+		zone_start_paddr = MEMMAP_INIT(mem_map + offset,
+				mem_map + offset + size,
+				nid * MAX_NR_ZONES + j, zone_start_paddr,
+				(j == ZONE_HIGHMEM ? 1 : 0));
 
 		offset += size;
 		for (i = 0; ; i++) {
@@ -877,7 +925,6 @@
 			  (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size);
 		}
 	}
-	build_zonelists(pgdat);
 }
 
 void __init free_area_init(unsigned long *zones_size)